From fc52eee3386726a0aa4263d2689e6020c6de801f Mon Sep 17 00:00:00 2001 From: "djm@kirby.fc.hp.com" Date: Thu, 22 Sep 2005 06:59:57 -0600 Subject: [PATCH] Remove all CONFIG_VTI, VTI now works dynamically 1.remove vcpu_set_regs and element regs,which are never used 2.remove ia64_prepare_handle_privop,ia64_prepare_handle_break, ia64_prepare_handle_reflection, which are never used. 3.modify related macros for adapting to three level physical to machine table 4.remove all CONFIG_VIT 5.merge ia64_switch_to Signed-off-by Anthony Xu --- xen/arch/ia64/Makefile | 7 +----- xen/arch/ia64/Rules.mk | 6 ++--- xen/arch/ia64/asm-offsets.c | 4 ---- xen/arch/ia64/asm-xsi-offsets.c | 2 -- xen/arch/ia64/linux-xen/entry.S | 25 ++++++++++++++++----- xen/arch/ia64/linux-xen/head.S | 6 ++--- xen/arch/ia64/linux-xen/unaligned.c | 22 +++++++++---------- xen/arch/ia64/vmx/mm.c | 4 +--- xen/arch/ia64/vmx/vmmu.c | 3 ++- xen/arch/ia64/vmx/vmx_hypercall.c | 13 ++++++----- xen/arch/ia64/vmx/vmx_init.c | 14 +++++++----- xen/arch/ia64/vmx/vmx_irq_ia64.c | 2 -- xen/arch/ia64/vmx/vmx_process.c | 3 ++- xen/arch/ia64/xen/domain.c | 33 +++++++--------------------- xen/arch/ia64/xen/grant_table.c | 2 -- xen/arch/ia64/xen/hypercall.c | 2 -- xen/arch/ia64/xen/privop.c | 1 - xen/arch/ia64/xen/process.c | 16 +++++++------- xen/arch/ia64/xen/regionreg.c | 2 +- xen/arch/ia64/xen/vcpu.c | 4 ---- xen/arch/ia64/xen/xenmem.c | 6 ----- xen/arch/ia64/xen/xenmisc.c | 34 ++++++++++++++--------------- xen/include/asm-ia64/config.h | 6 ++--- xen/include/asm-ia64/domain.h | 2 -- xen/include/asm-ia64/ia64_int.h | 2 +- xen/include/asm-ia64/mm.h | 22 +++++++------------ xen/include/asm-ia64/privop.h | 3 --- xen/include/asm-ia64/vmx_vcpu.h | 13 +---------- xen/include/asm-ia64/xensystem.h | 13 ++++++----- xen/include/public/arch-ia64.h | 4 ---- 30 files changed, 110 insertions(+), 166 deletions(-) diff --git a/xen/arch/ia64/Makefile b/xen/arch/ia64/Makefile index 60e31a170c..3eabd0b7ed 100644 --- a/xen/arch/ia64/Makefile +++ b/xen/arch/ia64/Makefile @@ -12,15 +12,10 @@ OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \ irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \ grant_table.o sn_console.o -# TMP holder to contain *.0 moved out of CONFIG_VTI -OBJS += vmx_init.o - -ifeq ($(CONFIG_VTI),y) -OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\ +OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\ vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \ vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o \ pal_emul.o vmx_irq_ia64.o -endif # lib files from xen/arch/ia64/linux/ (linux/arch/ia64/lib) OBJS += bitop.o clear_page.o flush.o copy_page_mck.o \ diff --git a/xen/arch/ia64/Rules.mk b/xen/arch/ia64/Rules.mk index d1a1dc4a95..8eb31cc674 100644 --- a/xen/arch/ia64/Rules.mk +++ b/xen/arch/ia64/Rules.mk @@ -1,7 +1,7 @@ ######################################## # ia64-specific definitions -CONFIG_VTI ?= n +VALIDATE_VT ?= n ifneq ($(COMPILE_ARCH),$(TARGET_ARCH)) CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux- endif @@ -27,7 +27,7 @@ CFLAGS += -Wno-pointer-arith -Wredundant-decls CFLAGS += -DIA64 -DXEN -DLINUX_2_6 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127 CFLAGS += -w -g -ifeq ($(CONFIG_VTI),y) -CFLAGS += -DCONFIG_VTI +ifeq ($(VALIDATE_VT),y) +CFLAGS += -DVALIDATE_VT endif LDFLAGS := -g diff --git a/xen/arch/ia64/asm-offsets.c b/xen/arch/ia64/asm-offsets.c index ad0fbcf9b0..8c1dfe6d6c 100644 --- a/xen/arch/ia64/asm-offsets.c +++ b/xen/arch/ia64/asm-offsets.c @@ -9,10 +9,8 @@ #include #include #include -#ifdef CONFIG_VTI #include #include -#endif // CONFIG_VTI #define task_struct vcpu @@ -222,14 +220,12 @@ void foo(void) BLANK(); -#ifdef CONFIG_VTI DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs)); DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0])); DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); BLANK(); -#endif //CONFIG_VTI //DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip)); //DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp)); //DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr)); diff --git a/xen/arch/ia64/asm-xsi-offsets.c b/xen/arch/ia64/asm-xsi-offsets.c index 80d5809ffd..6a2c6dfa40 100755 --- a/xen/arch/ia64/asm-xsi-offsets.c +++ b/xen/arch/ia64/asm-xsi-offsets.c @@ -32,10 +32,8 @@ #include #include #include -#ifdef CONFIG_VTI #include #include -#endif // CONFIG_VTI #define task_struct vcpu diff --git a/xen/arch/ia64/linux-xen/entry.S b/xen/arch/ia64/linux-xen/entry.S index 22f68b8950..c4f2448942 100644 --- a/xen/arch/ia64/linux-xen/entry.S +++ b/xen/arch/ia64/linux-xen/entry.S @@ -223,9 +223,20 @@ GLOBAL_ENTRY(ia64_switch_to) #else mov IA64_KR(CURRENT)=in0 // update "current" application register #endif +#ifdef XEN //for VTI domain current is save to 21 of bank0 + ;; + bsw.0 + ;; mov r8=r13 // return pointer to previously running task mov r13=in0 // set "current" pointer + mov r21=in0 + ;; + bsw.1 ;; +#else + mov r8=r13 // return pointer to previously running task + mov r13=in0 // set "current" pointer +#endif DO_LOAD_SWITCH_STACK #ifdef CONFIG_SMP @@ -632,12 +643,14 @@ GLOBAL_ENTRY(ia64_ret_from_clone) #ifdef XEN // new domains are cloned but not exec'ed so switch to user mode here cmp.ne pKStk,pUStk=r0,r0 -#ifdef CONFIG_VTI - br.cond.spnt ia64_leave_hypervisor -#else // CONFIG_VTI - br.cond.spnt ia64_leave_kernel -#endif // CONFIG_VTI - + adds r16 = IA64_VCPU_FLAGS_OFFSET, r13 + ;; + ld8 r16 = [r16] + ;; + cmp.ne p6,p7 = r16, r0 + (p6) br.cond.spnt ia64_leave_hypervisor + (p7) br.cond.spnt ia64_leave_kernel + ;; // adds r16 = IA64_VCPU_FLAGS_OFFSET, r13 // ;; // ld8 r16 = [r16] diff --git a/xen/arch/ia64/linux-xen/head.S b/xen/arch/ia64/linux-xen/head.S index 8208e899f0..9da143d979 100644 --- a/xen/arch/ia64/linux-xen/head.S +++ b/xen/arch/ia64/linux-xen/head.S @@ -259,7 +259,7 @@ start_ap: /* * Switch into virtual mode: */ -#if defined(XEN) && defined(CONFIG_VTI) +#if defined(XEN) && defined(VALIDATE_VT) movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\ |IA64_PSR_DI) #else @@ -284,7 +284,7 @@ start_ap: ;; // set IVT entry point---can't access I/O ports without it -#if defined(XEN) && defined(CONFIG_VTI) +#if defined(XEN) && defined(VALIDATE_VT) movl r3=vmx_ia64_ivt #else movl r3=ia64_ivt @@ -356,7 +356,7 @@ start_ap: .load_current: // load the "current" pointer (r13) and ar.k6 with the current task -#if defined(XEN) && defined(CONFIG_VTI) +#if defined(XEN) && defined(VALIDATE_VT) mov r21=r2 ;; bsw.1 diff --git a/xen/arch/ia64/linux-xen/unaligned.c b/xen/arch/ia64/linux-xen/unaligned.c index 29f412ce64..8b1cf71eec 100644 --- a/xen/arch/ia64/linux-xen/unaligned.c +++ b/xen/arch/ia64/linux-xen/unaligned.c @@ -201,12 +201,11 @@ static u16 gr_info[32]={ RPT(r1), RPT(r2), RPT(r3), -//#if defined(XEN) && defined(CONFIG_VTI) #if defined(XEN) RPT(r4), RPT(r5), RPT(r6), RPT(r7), -#else //CONFIG_VTI +#else RSW(r4), RSW(r5), RSW(r6), RSW(r7), -#endif //CONFIG_VTI +#endif RPT(r8), RPT(r9), RPT(r10), RPT(r11), RPT(r12), RPT(r13), RPT(r14), RPT(r15), @@ -296,7 +295,6 @@ rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg) return reg; } -//#if defined(XEN) && defined(CONFIG_VTI) #if defined(XEN) void set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat) @@ -414,7 +412,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, unsigne } } -#else // CONFIG_VTI +#else static void set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat) { @@ -559,7 +557,7 @@ get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *na *nat = 0; return; } -#endif // CONFIG_VTI +#endif #ifdef XEN @@ -595,11 +593,11 @@ setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs) unat = &sw->ar_unat; } else { addr = (unsigned long)regs; -#if defined(XEN) && defined(CONFIG_VTI) +#if defined(XEN) unat = ®s->eml_unat; -#else //CONFIG_VTI +#else unat = &sw->caller_unat; -#endif //CONFIG_VTI +#endif } DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n", addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum)); @@ -785,11 +783,11 @@ getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs unat = &sw->ar_unat; } else { addr = (unsigned long)regs; -#if defined(XEN) && defined(CONFIG_VTI) +#if defined(XEN) unat = ®s->eml_unat;; -#else //CONFIG_VTI +#else unat = &sw->caller_unat; -#endif //CONFIG_VTI +#endif } DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum)); diff --git a/xen/arch/ia64/vmx/mm.c b/xen/arch/ia64/vmx/mm.c index 7af69ebe08..4d3ed238fa 100644 --- a/xen/arch/ia64/vmx/mm.c +++ b/xen/arch/ia64/vmx/mm.c @@ -100,8 +100,7 @@ uregs->ptr is virtual address uregs->val is pte value */ -#ifdef CONFIG_VTI -int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom) +int vmx_do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom) { int i,cmd; u64 mfn, gpfn; @@ -149,4 +148,3 @@ int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom) } return 0; } -#endif diff --git a/xen/arch/ia64/vmx/vmmu.c b/xen/arch/ia64/vmx/vmmu.c index 9a00754366..99e50cc50b 100644 --- a/xen/arch/ia64/vmx/vmmu.c +++ b/xen/arch/ia64/vmx/vmmu.c @@ -220,6 +220,7 @@ thash_cb_t *init_domain_tlb(struct vcpu *d) * by control panel. Dom0 has gpfn identical to mfn, which doesn't need * this interface at all. */ +#if 0 void alloc_pmt(struct domain *d) { @@ -234,7 +235,7 @@ alloc_pmt(struct domain *d) d->arch.pmt = page_to_virt(page); memset(d->arch.pmt, 0x55, d->max_pages * 8); } - +#endif /* * Insert guest TLB to machine TLB. * data: In TLB format diff --git a/xen/arch/ia64/vmx/vmx_hypercall.c b/xen/arch/ia64/vmx/vmx_hypercall.c index 6414b01a1d..727a145033 100644 --- a/xen/arch/ia64/vmx/vmx_hypercall.c +++ b/xen/arch/ia64/vmx/vmx_hypercall.c @@ -47,11 +47,13 @@ void hyper_mmu_update(void) vcpu_get_gr_nat(vcpu,17,&r33); vcpu_get_gr_nat(vcpu,18,&r34); vcpu_get_gr_nat(vcpu,19,&r35); - ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35); + ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35); vcpu_set_gr(vcpu, 8, ret, 0); vmx_vcpu_increment_iip(vcpu); } - +/* turn off temporarily, we will merge hypercall parameter convention with xeno, when + VTI domain need to call hypercall */ +#if 0 unsigned long __hypercall_create_continuation( unsigned int op, unsigned int nr_args, ...) { @@ -87,7 +89,7 @@ unsigned long __hypercall_create_continuation( va_end(args); return op; } - +#endif void hyper_dom_mem_op(void) { VCPU *vcpu=current; @@ -184,14 +186,13 @@ void hyper_lock_page(void) static int do_set_shared_page(VCPU *vcpu, u64 gpa) { - u64 shared_info, o_info; + u64 o_info; struct domain *d = vcpu->domain; struct vcpu *v; if(vcpu->domain!=dom0) return -EPERM; - shared_info = __gpa_to_mpa(vcpu->domain, gpa); o_info = (u64)vcpu->domain->shared_info; - d->shared_info= (shared_info_t *)__va(shared_info); + d->shared_info= (shared_info_t *)domain_mpa_to_imva(vcpu->domain, gpa); /* Copy existing shared info into new page */ if (o_info) { diff --git a/xen/arch/ia64/vmx/vmx_init.c b/xen/arch/ia64/vmx/vmx_init.c index a5624eb3fd..d6b9d24f03 100644 --- a/xen/arch/ia64/vmx/vmx_init.c +++ b/xen/arch/ia64/vmx/vmx_init.c @@ -163,7 +163,8 @@ void vmx_setup_platform(struct vcpu *v, struct vcpu_guest_context *c) } /* FIXME: only support PMT table continuously by far */ - d->arch.pmt = __va(c->pt_base); +// d->arch.pmt = __va(c->pt_base); + vmx_final_setup_domain(d); } @@ -209,7 +210,6 @@ static vpd_t *alloc_vpd(void) } -#ifdef CONFIG_VTI /* * Create a VP on intialized VMX environment. */ @@ -333,7 +333,6 @@ vmx_change_double_mapping(struct vcpu *v, u64 oldrr7, u64 newrr7) pte_xen, pte_vhpt); } #endif // XEN_DBL_MAPPING -#endif // CONFIG_VTI /* * Initialize VMX envirenment for guest. Only the 1st vp/vcpu @@ -355,7 +354,11 @@ vmx_final_setup_domain(struct domain *d) v->arch.privregs = vpd; vpd->virt_env_vaddr = vm_buffer; -#ifdef CONFIG_VTI + /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick + * to this solution. Maybe it can be deferred until we know created + * one as vmx domain */ + v->arch.vtlb = init_domain_tlb(v); + /* v->arch.schedule_tail = arch_vmx_do_launch; */ vmx_create_vp(v); @@ -369,7 +372,6 @@ vmx_final_setup_domain(struct domain *d) vlsapic_reset(v); vtm_init(v); -#endif /* Other vmx specific initialization work */ } @@ -483,7 +485,7 @@ int vmx_alloc_contig_pages(struct domain *d) for (j = io_ranges[i].start; j < io_ranges[i].start + io_ranges[i].size; j += PAGE_SIZE) - map_domain_io_page(d, j); + map_domain_page(d, j, io_ranges[i].type); } set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags); diff --git a/xen/arch/ia64/vmx/vmx_irq_ia64.c b/xen/arch/ia64/vmx/vmx_irq_ia64.c index 19827329b2..84af87da41 100644 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c @@ -36,7 +36,6 @@ #define IRQ_DEBUG 0 -#ifdef CONFIG_VTI #define vmx_irq_enter() \ add_preempt_count(HARDIRQ_OFFSET); @@ -130,4 +129,3 @@ vmx_ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) if ( wake_dom0 && current != dom0 ) vcpu_wake(dom0->vcpu[0]); } -#endif diff --git a/xen/arch/ia64/vmx/vmx_process.c b/xen/arch/ia64/vmx/vmx_process.c index 824ea618b1..055687c9a3 100644 --- a/xen/arch/ia64/vmx/vmx_process.c +++ b/xen/arch/ia64/vmx/vmx_process.c @@ -314,11 +314,12 @@ void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) // prepare_if_physical_mode(v); if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ - if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, data->ppn>>(PAGE_SHIFT-12))){ + if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,data->ppn>>(PAGE_SHIFT-12))){ vadr=(vadr&((1UL<ps)-1))+(data->ppn>>(data->ps-12)<ps); emulate_io_inst(v, vadr, data->ma); return IA64_FAULT; } + if ( data->ps != vrr.ps ) { machine_tlb_insert(v, data); } diff --git a/xen/arch/ia64/xen/domain.c b/xen/arch/ia64/xen/domain.c index 97b63af658..4cd54a0b8c 100644 --- a/xen/arch/ia64/xen/domain.c +++ b/xen/arch/ia64/xen/domain.c @@ -7,7 +7,7 @@ * Copyright (C) 2005 Intel Co * Kun Tian (Kevin Tian) * - * 05/04/29 Kun Tian (Kevin Tian) Add CONFIG_VTI domain support + * 05/04/29 Kun Tian (Kevin Tian) Add VTI domain support */ #include @@ -204,13 +204,6 @@ void arch_do_createdomain(struct vcpu *v) d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME -#ifdef CONFIG_VTI - /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick - * to this solution. Maybe it can be deferred until we know created - * one as vmx domain */ - v->arch.vtlb = init_domain_tlb(v); -#endif - /* We may also need emulation rid for region4, though it's unlikely * to see guest issue uncacheable access in metaphysical mode. But * keep such info here may be more sane. @@ -361,7 +354,6 @@ void new_thread(struct vcpu *v, regs->ar_fpsr = FPSR_DEFAULT; if (VMX_DOMAIN(v)) { -#ifdef CONFIG_VTI vmx_init_all_rr(v); if (d == dom0) // VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); @@ -369,7 +361,6 @@ void new_thread(struct vcpu *v, /* Virtual processor context setup */ VCPU(v, vpsr) = IA64_PSR_BN; VCPU(v, dcr) = 0; -#endif } else { init_all_rr(v); if (d == dom0) @@ -480,7 +471,7 @@ void map_domain_page(struct domain *d, unsigned long mpaddr, unsigned long physa } else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr); } - +#if 0 /* map a physical address with specified I/O flag */ void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags) { @@ -517,7 +508,7 @@ void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long fl } else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr); } - +#endif void mpafoo(unsigned long mpaddr) { extern unsigned long privop_trace; @@ -571,7 +562,7 @@ tryagain: } // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE -#ifndef CONFIG_VTI +#if 1 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr) { unsigned long pte = lookup_domain_mpa(d,mpaddr); @@ -582,14 +573,14 @@ unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr) imva |= mpaddr & ~PAGE_MASK; return(imva); } -#else // CONFIG_VTI +#else unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr) { unsigned long imva = __gpa_to_mpa(d, mpaddr); return __va(imva); } -#endif // CONFIG_VTI +#endif // remove following line if not privifying in memory //#define HAVE_PRIVIFY_MEMORY @@ -860,7 +851,7 @@ int construct_dom0(struct domain *d, if ( rc != 0 ) return rc; -#ifdef CONFIG_VTI +#ifdef VALIDATE_VT /* Temp workaround */ if (running_on_sim) dsi.xen_section_string = (char *)1; @@ -920,7 +911,7 @@ int construct_dom0(struct domain *d, for ( i = 1; i < MAX_VIRT_CPUS; i++ ) d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1; -#ifdef CONFIG_VTI +#ifdef VALIDATE_VT /* Construct a frame-allocation list for the initial domain, since these * pages are allocated by boot allocator and pfns are not set properly */ @@ -938,10 +929,6 @@ int construct_dom0(struct domain *d, machine_to_phys_mapping[mfn] = mfn; } - /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt - * for dom0 - */ - d->arch.pmt = NULL; #endif /* Copy the OS image. */ @@ -1162,12 +1149,8 @@ void vcpu_migrate_cpu(struct vcpu *v, int newcpu) void sync_vcpu_execstate(struct vcpu *v) { ia64_save_fpu(v->arch._thread.fph); -#ifdef CONFIG_VTI if (VMX_DOMAIN(v)) vmx_save_state(v); -#else - if (0) do {} while(0); -#endif else { if (IA64_HAS_EXTRA_STATE(v)) ia64_save_extra(v); diff --git a/xen/arch/ia64/xen/grant_table.c b/xen/arch/ia64/xen/grant_table.c index fd66986e07..2905861dd4 100644 --- a/xen/arch/ia64/xen/grant_table.c +++ b/xen/arch/ia64/xen/grant_table.c @@ -1,4 +1,3 @@ -#ifndef CONFIG_VTI // temporarily in arch/ia64 until can merge into common/grant_table.c /****************************************************************************** * common/grant_table.c @@ -1452,7 +1451,6 @@ grant_table_init( { /* Nothing. */ } -#endif /* * Local variables: diff --git a/xen/arch/ia64/xen/hypercall.c b/xen/arch/ia64/xen/hypercall.c index 79d69db996..c8abf6d115 100644 --- a/xen/arch/ia64/xen/hypercall.c +++ b/xen/arch/ia64/xen/hypercall.c @@ -178,11 +178,9 @@ ia64_hypercall (struct pt_regs *regs) regs->r8 = do_event_channel_op(regs->r14); break; -#ifndef CONFIG_VTI case __HYPERVISOR_grant_table_op: regs->r8 = do_grant_table_op(regs->r14, regs->r15, regs->r16); break; -#endif case __HYPERVISOR_console_io: regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16); diff --git a/xen/arch/ia64/xen/privop.c b/xen/arch/ia64/xen/privop.c index 36fa02831b..def644c30e 100644 --- a/xen/arch/ia64/xen/privop.c +++ b/xen/arch/ia64/xen/privop.c @@ -726,7 +726,6 @@ priv_emulate(VCPU *vcpu, REGS *regs, UINT64 isr) return IA64_ILLOP_FAULT; } //if (isrcode != 1 && isrcode != 2) return 0; - vcpu_set_regs(vcpu,regs); privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT; // its OK for a privified-cover to be executed in user-land fault = priv_handle_op(vcpu,regs,privlvl); diff --git a/xen/arch/ia64/xen/process.c b/xen/arch/ia64/xen/process.c index a58a937947..0e975b668f 100644 --- a/xen/arch/ia64/xen/process.c +++ b/xen/arch/ia64/xen/process.c @@ -67,14 +67,14 @@ void schedule_tail(struct vcpu *next) unsigned long rr7; //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); -#ifdef CONFIG_VTI /* rr7 will be postponed to last point when resuming back to guest */ - vmx_load_all_rr(current); -#else // CONFIG_VTI - if (rr7 = load_region_regs(current)) { - printk("schedule_tail: change to rr7 not yet implemented\n"); - } -#endif // CONFIG_VTI + if(VMX_DOMAIN(current)){ + vmx_load_all_rr(current); + }else{ + if (rr7 = load_region_regs(current)) { + printk("schedule_tail: change to rr7 not yet implemented\n"); + } + } } void tdpfoo(void) { } @@ -755,7 +755,7 @@ unsigned long __hypercall_create_continuation( { struct mc_state *mcs = &mc_state[smp_processor_id()]; VCPU *vcpu = current; - struct cpu_user_regs *regs = vcpu->arch.regs; + struct cpu_user_regs *regs = vcpu_regs(vcpu); unsigned int i; va_list args; diff --git a/xen/arch/ia64/xen/regionreg.c b/xen/arch/ia64/xen/regionreg.c index fa0d119258..418ba8df23 100644 --- a/xen/arch/ia64/xen/regionreg.c +++ b/xen/arch/ia64/xen/regionreg.c @@ -227,7 +227,7 @@ int set_one_rr(unsigned long rr, unsigned long val) return 0; } -#ifdef CONFIG_VTI +#if 0 memrrv.rrval = rrv.rrval; if (rreg == 7) { newrrv.rid = newrid; diff --git a/xen/arch/ia64/xen/vcpu.c b/xen/arch/ia64/xen/vcpu.c index dcce55759e..44c13b1eb6 100644 --- a/xen/arch/ia64/xen/vcpu.c +++ b/xen/arch/ia64/xen/vcpu.c @@ -1977,7 +1977,3 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 addr_range) return (IA64_ILLOP_FAULT); } -void vcpu_set_regs(VCPU *vcpu, REGS *regs) -{ - vcpu->arch.regs = regs; -} diff --git a/xen/arch/ia64/xen/xenmem.c b/xen/arch/ia64/xen/xenmem.c index 42b83d97a2..31c9ae3b51 100644 --- a/xen/arch/ia64/xen/xenmem.c +++ b/xen/arch/ia64/xen/xenmem.c @@ -28,17 +28,13 @@ static unsigned long num_dma_physpages; /* * Set up the page tables. */ -#ifdef CONFIG_VTI unsigned long *mpt_table; unsigned long mpt_table_size; -#endif // CONFIG_VTI void paging_init (void) { struct pfn_info *pg; - -#ifdef CONFIG_VTI unsigned int mpt_order; /* Create machine to physical mapping table * NOTE: similar to frame table, later we may need virtually @@ -53,8 +49,6 @@ paging_init (void) printk("machine to physical table: 0x%lx\n", (u64)mpt_table); memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size); -#endif // CONFIG_VTI - /* Other mapping setup */ zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); diff --git a/xen/arch/ia64/xen/xenmisc.c b/xen/arch/ia64/xen/xenmisc.c index 967d6789dd..49a44e0001 100644 --- a/xen/arch/ia64/xen/xenmisc.c +++ b/xen/arch/ia64/xen/xenmisc.c @@ -65,7 +65,7 @@ platform_is_hp_ski(void) void sync_lazy_execstate_cpu(unsigned int cpu) {} -#ifdef CONFIG_VTI +#if 0 int grant_table_create(struct domain *d) { return 0; } void grant_table_destroy(struct domain *d) { return; } #endif @@ -77,7 +77,6 @@ void raise_actimer_softirq(void) raise_softirq(AC_TIMER_SOFTIRQ); } -#ifndef CONFIG_VTI unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) { @@ -93,7 +92,7 @@ while(1); return ((pte & _PFN_MASK) >> PAGE_SHIFT); } } - +#if 0 u32 __mfn_to_gpfn(struct domain *d, unsigned long frame) { @@ -288,14 +287,14 @@ void context_switch(struct vcpu *prev, struct vcpu *next) //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo(); //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo(); //printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id); -#ifdef CONFIG_VTI - vtm_domain_out(prev); -#endif + if(VMX_DOMAIN(prev)){ + vtm_domain_out(prev); + } context_switch_count++; switch_to(prev,next,prev); -#ifdef CONFIG_VTI - vtm_domain_in(current); -#endif + if(VMX_DOMAIN(current)){ + vtm_domain_in(current); + } // leave this debug for now: it acts as a heartbeat when more than // one domain is active @@ -307,16 +306,15 @@ if (!cnt[id]--) { printk("%x",id); cnt[id] = 500000; } if (!i--) { printk("+",id); i = 1000000; } } -#ifdef CONFIG_VTI - if (VMX_DOMAIN(current)) + if (VMX_DOMAIN(current)){ vmx_load_all_rr(current); -#else - if (!is_idle_task(current->domain)) { - load_region_regs(current); - if (vcpu_timer_expired(current)) vcpu_pend_timer(current); - } - if (vcpu_timer_expired(current)) vcpu_pend_timer(current); -#endif + }else{ + if (!is_idle_task(current->domain)) { + load_region_regs(current); + if (vcpu_timer_expired(current)) vcpu_pend_timer(current); + } + if (vcpu_timer_expired(current)) vcpu_pend_timer(current); + } } void context_switch_finalise(struct vcpu *next) diff --git a/xen/include/asm-ia64/config.h b/xen/include/asm-ia64/config.h index af5c6893e0..1d3ba8dabd 100644 --- a/xen/include/asm-ia64/config.h +++ b/xen/include/asm-ia64/config.h @@ -199,11 +199,11 @@ void sort_main_extable(void); access_ok(type,addr,count*size)) // see drivers/char/console.c -#ifndef CONFIG_VTI +#ifndef VALIDATE_VT #define OPT_CONSOLE_STR "com1" -#else // CONFIG_VTI +#else #define OPT_CONSOLE_STR "com2" -#endif // CONFIG_VTI +#endif #define __attribute_used__ __attribute__ ((unused)) #define __nocast diff --git a/xen/include/asm-ia64/domain.h b/xen/include/asm-ia64/domain.h index 6a25760c78..6904cf7d21 100644 --- a/xen/include/asm-ia64/domain.h +++ b/xen/include/asm-ia64/domain.h @@ -25,7 +25,6 @@ struct arch_domain { int breakimm; int imp_va_msb; - unsigned long *pmt; /* physical to machine table */ /* System pages out of guest memory, like for xenstore/console */ unsigned long sys_pgnr; unsigned long max_pfn; /* Max pfn including I/O holes */ @@ -62,7 +61,6 @@ struct arch_vcpu { unsigned long xen_itm; unsigned long xen_timer_interval; #endif - void *regs; /* temporary until find a better way to do privops */ mapped_regs_t *privregs; /* save the state of vcpu */ int metaphysical_rr0; // from arch_domain (so is pinned) int metaphysical_rr4; // from arch_domain (so is pinned) diff --git a/xen/include/asm-ia64/ia64_int.h b/xen/include/asm-ia64/ia64_int.h index 5808a99be5..c2a1aec0b0 100644 --- a/xen/include/asm-ia64/ia64_int.h +++ b/xen/include/asm-ia64/ia64_int.h @@ -45,7 +45,7 @@ #define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40) #define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80) #define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR) -#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x10) +#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x7) #if !defined(__ASSEMBLY__) typedef unsigned long IA64FAULT; typedef unsigned long IA64INTVECTOR; diff --git a/xen/include/asm-ia64/mm.h b/xen/include/asm-ia64/mm.h index 93acb34776..100400cb5c 100644 --- a/xen/include/asm-ia64/mm.h +++ b/xen/include/asm-ia64/mm.h @@ -134,7 +134,7 @@ void add_to_domain_alloc_list(unsigned long ps, unsigned long pe); static inline void put_page(struct pfn_info *page) { -#ifdef CONFIG_VTI // doesn't work with non-VTI in grant tables yet +#ifdef VALIDATE_VT // doesn't work with non-VTI in grant tables yet u32 nx, x, y = page->count_info; do { @@ -152,7 +152,7 @@ static inline void put_page(struct pfn_info *page) static inline int get_page(struct pfn_info *page, struct domain *domain) { -#ifdef CONFIG_VTI +#ifdef VALIDATE_VT u64 x, nx, y = *((u64*)&page->count_info); u32 _domain = pickle_domptr(domain); @@ -404,7 +404,6 @@ extern unsigned long num_physpages; extern unsigned long totalram_pages; extern int nr_swap_pages; -#ifdef CONFIG_VTI extern unsigned long *mpt_table; #undef machine_to_phys_mapping #define machine_to_phys_mapping mpt_table @@ -415,34 +414,29 @@ extern unsigned long *mpt_table; /* If pmt table is provided by control pannel later, we need __get_user * here. However if it's allocated by HV, we should access it directly */ -#define get_mfn_from_pfn(d, gpfn) \ - ((d) == dom0 ? gpfn : \ - (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \ - INVALID_MFN)) #define __mfn_to_gpfn(_d, mfn) \ machine_to_phys_mapping[(mfn)] #define __gpfn_to_mfn(_d, gpfn) \ - get_mfn_from_pfn((_d), (gpfn)) + __gpfn_to_mfn_foreign((_d), (gpfn)) #define __gpfn_invalid(_d, gpfn) \ - (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK) + (lookup_domain_mpa((_d), ((gpfn)<>PAGE_SHIFT)<>PAGE_SHIFT)< -//#ifdef CONFIG_VTI #include -//#else //CONFIG_VTI #include -//#endif //CONFIG_VTI typedef unsigned long IA64_INST; diff --git a/xen/include/asm-ia64/vmx_vcpu.h b/xen/include/asm-ia64/vmx_vcpu.h index 6f99a392a6..f0729b3e0a 100644 --- a/xen/include/asm-ia64/vmx_vcpu.h +++ b/xen/include/asm-ia64/vmx_vcpu.h @@ -62,7 +62,7 @@ extern u64 set_isr_ei_ni (VCPU *vcpu); extern u64 set_isr_for_na_inst(VCPU *vcpu, int op); -/* next all for CONFIG_VTI APIs definition */ +/* next all for VTI domain APIs definition */ extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value); extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value); extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value); @@ -252,12 +252,9 @@ IA64FAULT vmx_vcpu_set_itm(VCPU *vcpu, u64 val) { vtime_t *vtm; - vtm=&(vcpu->arch.arch_vmx.vtm); VCPU(vcpu,itm)=val; -#ifdef CONFIG_VTI vtm_interruption_update(vcpu, vtm); -#endif return IA64_NO_FAULT; } static inline @@ -292,9 +289,7 @@ static inline IA64FAULT vmx_vcpu_set_eoi(VCPU *vcpu, u64 val) { -#ifdef CONFIG_VTI guest_write_eoi(vcpu); -#endif return IA64_NO_FAULT; } @@ -304,9 +299,7 @@ vmx_vcpu_set_itv(VCPU *vcpu, u64 val) { VCPU(vcpu,itv)=val; -#ifdef CONFIG_VTI vtm_set_itv(vcpu); -#endif return IA64_NO_FAULT; } static inline @@ -347,17 +340,13 @@ vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val) static inline IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val) { -#ifdef CONFIG_VTI vtm_set_itc(vcpu, val); -#endif return IA64_NO_FAULT; } static inline IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val) { -#ifdef CONFIG_VTI *val = vtm_get_itc(vcpu); -#endif return IA64_NO_FAULT; } static inline diff --git a/xen/include/asm-ia64/xensystem.h b/xen/include/asm-ia64/xensystem.h index 95284f8a77..d204141fcb 100644 --- a/xen/include/asm-ia64/xensystem.h +++ b/xen/include/asm-ia64/xensystem.h @@ -34,7 +34,7 @@ #define IA64_HAS_EXTRA_STATE(t) 0 #undef __switch_to -#ifdef CONFIG_VTI +#if 1 extern struct task_struct *vmx_ia64_switch_to (void *next_task); #define __switch_to(prev,next,last) do { \ ia64_save_fpu(prev->arch._thread.fph); \ @@ -51,10 +51,13 @@ extern struct task_struct *vmx_ia64_switch_to (void *next_task); if (IA64_HAS_EXTRA_STATE(next)) \ ia64_save_extra(next); \ } \ - ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ - (last) = vmx_ia64_switch_to((next)); \ + /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \ + (last) = ia64_switch_to((next)); \ + if (!VMX_DOMAIN(current)){ \ + vcpu_set_next_timer(current); \ + } \ } while (0) -#else // CONFIG_VTI +#else #define __switch_to(prev,next,last) do { \ ia64_save_fpu(prev->arch._thread.fph); \ ia64_load_fpu(next->arch._thread.fph); \ @@ -66,7 +69,7 @@ extern struct task_struct *vmx_ia64_switch_to (void *next_task); (last) = ia64_switch_to((next)); \ vcpu_set_next_timer(current); \ } while (0) -#endif // CONFIG_VTI +#endif #undef switch_to // FIXME SMP... see system.h, does this need to be different? diff --git a/xen/include/public/arch-ia64.h b/xen/include/public/arch-ia64.h index d25ec18fcf..e6e5c48f8e 100644 --- a/xen/include/public/arch-ia64.h +++ b/xen/include/public/arch-ia64.h @@ -232,13 +232,9 @@ typedef struct { // FIXME: tmp[8] temp'ly being used for virtual psr.pp }; }; -#if 0 -#ifdef CONFIG_VTI unsigned long reserved6[3456]; unsigned long vmm_avail[128]; unsigned long reserved7[4096]; -#endif -#endif } mapped_regs_t; typedef struct { -- 2.30.2